In [ ]:
import json
import pandas as pd
import ipywidgets as widgets
from IPython.display import display, HTML, Image
In [ ]:
# Load the JSON data into a Python dictionary
json_file_path = '/home/daniarch/projects/kohya_ss/NB/json/V3_R4_lora_sdxl_prodigy_22_20240119-221944.json'
with open(json_file_path, 'r') as file:
settings_json = json.load(file)
# Display the JSON file name with larger font size
file_name = json_file_path.split('/')[-1]
display(HTML(f"<h3 style='font-size: 45px;'>{file_name}</h3>"))
# Truncate the value of 'sample_prompts' to 10 characters
if 'sample_prompts' in settings_json:
settings_json['sample_prompts'] = settings_json['sample_prompts'][:10]
# Convert to a DataFrame
settings_df = pd.DataFrame(list(settings_json.items()), columns=['Setting', 'Value'])
# Convert index to a column and reset the index
settings_df.reset_index(inplace=True)
settings_df.rename(columns={'index': 'No.'}, inplace=True)
# Splitting the DataFrame into eighths
eighth_len = len(settings_df) // 8
eighths = [settings_df.iloc[i*eighth_len:(i+1)*eighth_len] for i in range(8)]
# Handling the case where the total number of rows is not exactly divisible by 8
if len(settings_df) % 8 != 0:
eighths[-1] = pd.concat([eighths[-1], settings_df.iloc[8*eighth_len:]])
# Adjusting the display options for a compact view
pd.set_option('display.max_columns', 40)
pd.set_option('display.width', 4000)
pd.set_option('display.max_colwidth', None)
# Combining the eight parts side by side
df_display = eighths[0].reset_index(drop=True)
for i in range(1, 8):
df_display = df_display.join(eighths[i].reset_index(drop=True), rsuffix=f'_{i+1}')
# Apply styling for vertical alignment to the entire DataFrame
styled_df_full_aligned = df_display.style.set_properties(**{'vertical-align': 'top'})
# Display the styled DataFrame
display(styled_df_full_aligned)
# Define the list of important settings
important_keys = [
'additional_parameters', 'logging_dir', 'min_snr_gamma', 'network_alpha',
'network_dim', 'optimizer', 'optimizer_args', 'pretrained_model_name_or_path',
'sdxl', 'train_data_dir', 'LoRA_type', 'lr_scheduler', 'learning_rate',
'keep_tokens', 'training_comment', 'train_batch_size', 'shuffle_caption', 'max_timestep'
]
# Extract the important settings
important_settings = {k: settings_json.get(k, 'Not available') for k in important_keys}
# Convert to a DataFrame
important_settings_df = pd.DataFrame(list(important_settings.items()), columns=['Setting', 'Value'])
# Display the title for important settings
display(HTML("<h3 style='font-size: 45px;'>Important Settings</h3>"))
# Display the important settings DataFrame
display(important_settings_df)
# Define the image path
image_path = "/home/daniarch/projects/kohya_ss/NB/images_nb/V3_R4_lora_sdxl_prodigy_22.jpg"
# Extracting and displaying the image file name with larger font size
image_file_name = image_path.split('/')[-1]
display(HTML(f"<h3 style='font-size: 45px;'>{image_file_name}</h3>"))
# Display the image
display(Image(image_path))
V3_R4_lora_sdxl_prodigy_22_20240119-221944.json
| Â | No. | Setting | Value | No._2 | Setting_2 | Value_2 | No._3 | Setting_3 | Value_3 | No._4 | Setting_4 | Value_4 | No._5 | Setting_5 | Value_5 | No._6 | Setting_6 | Value_6 | No._7 | Setting_7 | Value_7 | No._8 | Setting_8 | Value_8 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 0 | LoRA_type | Standard | 15 | color_aug | False | 30 | full_fp16 | False | 45 | max_resolution | 1024,1024 | 60 | network_alpha | 32 | 75 | rank_dropout | 0 | 90 | save_state | False | 105 | unet_lr | 1.000000 |
| 1 | 1 | LyCORIS_preset | full | 16 | constrain | 0.000000 | 31 | gradient_accumulation_steps | 1 | 46 | max_timestep | 1000 | 61 | network_dim | 32 | 76 | rank_dropout_scale | True | 91 | scale_v_pred_loss_like_noise_pred | False | 106 | unit | 1 |
| 2 | 2 | adaptive_noise_scale | 0 | 17 | conv_alpha | 1 | 32 | gradient_checkpointing | True | 47 | max_token_length | 75 | 62 | network_dropout | 0 | 77 | reg_data_dir | 92 | scale_weight_norms | 0 | 107 | up_lr_weight | ||
| 3 | 3 | additional_parameters | --lr_scheduler_type "CosineAnnealingLR" --lr_scheduler_args "T_max=1000" | 18 | conv_block_alphas | 33 | keep_tokens | 1 | 48 | max_train_epochs | 63 | no_token_padding | False | 78 | rescaled | False | 93 | sdxl | True | 108 | use_cp | False | ||
| 4 | 4 | block_alphas | 19 | conv_block_dims | 34 | learning_rate | 1.000000 | 49 | max_train_steps | 1000 | 64 | noise_offset | 0 | 79 | resume | 94 | sdxl_cache_text_encoder_outputs | False | 109 | use_scalar | False | |||
| 5 | 5 | block_dims | 20 | conv_dim | 1 | 35 | logging_dir | /home/daniarch/projects/kohya_ss/V3_R4/log | 50 | mem_eff_attn | False | 65 | noise_offset_type | Original | 80 | sample_every_n_epochs | 5 | 95 | sdxl_no_half_vae | True | 110 | use_tucker | False | |
| 6 | 6 | block_lr_zero_threshold | 21 | debiased_estimation_loss | False | 36 | lora_network_weights | 51 | mid_lr_weight | 66 | num_cpu_threads_per_process | 2 | 81 | sample_every_n_steps | 0 | 96 | seed | 1337 | 111 | use_wandb | True | |||
| 7 | 7 | bucket_no_upscale | True | 22 | decompose_both | False | 37 | lr_scheduler | constant | 52 | min_bucket_reso | 256 | 67 | optimizer | Prodigy | 82 | sample_prompts | a hand hol | 97 | shuffle_caption | True | 112 | v2 | False |
| 8 | 8 | bucket_reso_steps | 64 | 23 | dim_from_weights | False | 38 | lr_scheduler_args | 53 | min_snr_gamma | 5 | 68 | optimizer_args | decouple=True weight_decay=0.45 d_coef=2 use_bias_correction=True safeguard_warmup=True betas=0.9,0.99 | 83 | sample_sampler | dpm_2 | 98 | stop_text_encoder_training_pct | 0 | 113 | v_parameterization | False | |
| 9 | 9 | cache_latents | True | 24 | down_lr_weight | 39 | lr_scheduler_num_cycles | 54 | min_timestep | 0 | 69 | output_dir | /home/daniarch/projects/kohya_ss/V3_R4/model | 84 | save_every_n_epochs | 1 | 99 | text_encoder_lr | 1.000000 | 114 | v_pred_like_loss | 0 | ||
| 10 | 10 | cache_latents_to_disk | True | 25 | enable_bucket | True | 40 | lr_scheduler_power | 55 | mixed_precision | bf16 | 70 | output_name | V3_R2_lora_sdxl_prodigy_22 | 85 | save_every_n_steps | 0 | 100 | train_batch_size | 4 | 115 | vae | ||
| 11 | 11 | caption_dropout_every_n_epochs | 0.000000 | 26 | epoch | 0 | 41 | lr_warmup | 0 | 56 | model_list | custom | 71 | persistent_data_loader_workers | False | 86 | save_last_n_steps | 0 | 101 | train_data_dir | /home/daniarch/projects/kohya_ss/V3_R4/img | 116 | vae_batch_size | 0 |
| 12 | 12 | caption_dropout_rate | 0 | 27 | factor | -1 | 42 | max_bucket_reso | 2048 | 57 | module_dropout | 0 | 72 | pretrained_model_name_or_path | /home/daniarch/miniconda3/envs/autoLDM/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors | 87 | save_last_n_steps_state | 0 | 102 | train_norm | True | 117 | wandb_api_key | 647fd09ca3b2d990b3cf95aa7598f849b02fb9c8 |
| 13 | 13 | caption_extension | .txt | 28 | flip_aug | False | 43 | max_data_loader_n_workers | 0 | 58 | multires_noise_discount | 0 | 73 | prior_loss_weight | 1.000000 | 88 | save_model_as | safetensors | 103 | train_on_input | True | 118 | weighted_captions | False |
| 14 | 14 | clip_skip | 1 | 29 | full_bf16 | False | 44 | max_grad_norm | 1 | 59 | multires_noise_iterations | 0 | 74 | random_crop | False | 89 | save_precision | bf16 | 104 | training_comment | 4 repeat | 119 | xformers | xformers |
Important Settings
| Setting | Value | |
|---|---|---|
| 0 | additional_parameters | --lr_scheduler_type "CosineAnnealingLR" --lr_scheduler_args "T_max=1000" |
| 1 | logging_dir | /home/daniarch/projects/kohya_ss/V3_R4/log |
| 2 | min_snr_gamma | 5 |
| 3 | network_alpha | 32 |
| 4 | network_dim | 32 |
| 5 | optimizer | Prodigy |
| 6 | optimizer_args | decouple=True weight_decay=0.45 d_coef=2 use_bias_correction=True safeguard_warmup=True betas=0.9,0.99 |
| 7 | pretrained_model_name_or_path | /home/daniarch/miniconda3/envs/autoLDM/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors |
| 8 | sdxl | True |
| 9 | train_data_dir | /home/daniarch/projects/kohya_ss/V3_R4/img |
| 10 | LoRA_type | Standard |
| 11 | lr_scheduler | constant |
| 12 | learning_rate | 1.0 |
| 13 | keep_tokens | 1 |
| 14 | training_comment | 4 repeat |
| 15 | train_batch_size | 4 |
| 16 | shuffle_caption | True |
| 17 | max_timestep | 1000 |